Map/unmap IO ports.
Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
struct domain *d;
unsigned int fp = op->u.ioport_permission.first_port;
unsigned int np = op->u.ioport_permission.nr_ports;
+ unsigned int lp = fp + np - 1;
ret = -ESRCH;
d = find_domain_by_id(op->u.ioport_permission.domain);
ret = 0;
else {
if (op->u.ioport_permission.allow_access)
- ret = ioports_permit_access(d, fp, fp + np - 1);
+ ret = ioports_permit_access(d, fp, lp);
else
- ret = ioports_deny_access(d, fp, fp + np - 1);
+ ret = ioports_deny_access(d, fp, lp);
}
put_domain(d);
} else {
#ifndef CONFIG_XEN_IA64_DOM0_VP
/* Dom0 maps legacy mmio in first MB. */
- MAKE_MD(EFI_LOADER_DATA,EFI_MEMORY_WB,0*MB,1*MB, 1);
- MAKE_MD(EFI_CONVENTIONAL_MEMORY,EFI_MEMORY_WB,HYPERCALL_END,maxmem, 1);
+ MAKE_MD(EFI_LOADER_DATA, EFI_MEMORY_WB, 0*MB, 1*MB, 1);
+ MAKE_MD(EFI_CONVENTIONAL_MEMORY, EFI_MEMORY_WB,
+ HYPERCALL_END, maxmem, 1);
#endif
- /* hypercall patches live here, masquerade as reserved PAL memory */
- MAKE_MD(EFI_PAL_CODE,EFI_MEMORY_WB|EFI_MEMORY_RUNTIME,HYPERCALL_START,HYPERCALL_END, 1);
- /* Create a dummy entry for IO ports, so that IO accesses are
- trapped by Xen. */
- MAKE_MD(EFI_MEMORY_MAPPED_IO_PORT_SPACE,EFI_MEMORY_UC,
- 0x00000ffffc000000, 0x00000fffffffffff, 1);
+ /* hypercall patches live here, masquerade as reserved
+ PAL memory */
+ MAKE_MD(EFI_PAL_CODE, EFI_MEMORY_WB | EFI_MEMORY_RUNTIME,
+ HYPERCALL_START, HYPERCALL_END, 1);
+ /* Create an entry for IO ports. */
+ MAKE_MD(EFI_MEMORY_MAPPED_IO_PORT_SPACE, EFI_MEMORY_UC,
+ IO_PORTS_PADDR, IO_PORTS_PADDR + IO_PORTS_SIZE, 1);
MAKE_MD(EFI_RESERVED_TYPE,0,0,0,0);
}
unsigned long old_mfn, unsigned long new_mfn);
#endif
+extern unsigned long ia64_iobase;
+
static struct domain *dom_xen, *dom_io;
// followings are stolen from arch_init_memory() @ xen/arch/x86/mm.c
__assign_domain_page(d, mpaddr, physaddr, ASSIGN_writable);
}
+int
+ioports_permit_access(struct domain *d, unsigned long fp, unsigned long lp)
+{
+ int ret;
+ unsigned long off;
+ unsigned long fp_offset;
+ unsigned long lp_offset;
+
+ ret = rangeset_add_range(d->arch.ioport_caps, fp, lp);
+ if (ret != 0)
+ return ret;
+
+ fp_offset = IO_SPACE_SPARSE_ENCODING(fp) & ~PAGE_MASK;
+ lp_offset = PAGE_ALIGN(IO_SPACE_SPARSE_ENCODING(lp));
+
+ for (off = fp_offset; off <= lp_offset; off += PAGE_SIZE)
+ __assign_domain_page(d, IO_PORTS_PADDR + off,
+ ia64_iobase + off, ASSIGN_nocache);
+
+ return 0;
+}
+
+int
+ioports_deny_access(struct domain *d, unsigned long fp, unsigned long lp)
+{
+ int ret;
+ struct mm_struct *mm = &d->arch.mm;
+ unsigned long off;
+ unsigned long fp_offset;
+ unsigned long lp_offset;
+
+ ret = rangeset_remove_range(d->arch.ioport_caps, fp, lp);
+ if (ret != 0)
+ return ret;
+
+ fp_offset = IO_SPACE_SPARSE_ENCODING(fp) & ~PAGE_MASK;
+ lp_offset = PAGE_ALIGN(IO_SPACE_SPARSE_ENCODING(lp));
+
+ for (off = fp_offset; off <= lp_offset; off += PAGE_SIZE) {
+ unsigned long mpaddr = IO_PORTS_PADDR + off;
+ volatile pte_t *pte;
+ pte_t old_pte;
+
+ pte = lookup_noalloc_domain_pte_none(d, mpaddr);
+ BUG_ON(pte == NULL);
+ BUG_ON(pte_none(*pte));
+
+ // clear pte
+ old_pte = ptep_get_and_clear(mm, mpaddr, pte);
+ }
+ domain_flush_vtlb_all();
+ return 0;
+}
+
#ifdef CONFIG_XEN_IA64_DOM0_VP
static void
assign_domain_same_page(struct domain *d,
#include <asm/uaccess.h> /* for KERNEL_DS */
#include <asm/pgtable.h>
+/* Guest physical address of IO ports space. */
+#define IO_PORTS_PADDR 0x00000ffffc000000UL
+#define IO_PORTS_SIZE 0x0000000004000000UL
+
#endif /* __ASM_DOMAIN_H__ */
/*
#ifndef __IA64_IOCAP_H__
#define __IA64_IOCAP_H__
-#define ioports_permit_access(d, s, e) \
- rangeset_add_range((d)->arch.ioport_caps, s, e)
-#define ioports_deny_access(d, s, e) \
- rangeset_remove_range((d)->arch.ioport_caps, s, e)
+extern int ioports_permit_access(struct domain *d,
+ unsigned long s, unsigned long e);
+extern int ioports_deny_access(struct domain *d,
+ unsigned long s, unsigned long e);
+
#define ioports_access_permitted(d, s, e) \
rangeset_contains_range((d)->arch.ioport_caps, s, e)